From 99a37802489a7c6b560c633aa521eadf3113eb8d Mon Sep 17 00:00:00 2001 From: "kaf24@scramble.cl.cam.ac.uk" Date: Wed, 8 Oct 2003 14:15:08 +0000 Subject: [PATCH] bitkeeper revision 1.496 (3f841becdre1gQrbAv8C5pF-O_M1fg) sched.h, pgalloc.h, timer.c, schedule.c, domain.c, smp.c: Various cleanups. Nothing major. --- xen/arch/i386/smp.c | 103 +++++---------------------------- xen/common/domain.c | 1 - xen/common/schedule.c | 4 +- xen/common/timer.c | 35 ----------- xen/include/asm-i386/pgalloc.h | 11 ---- xen/include/xeno/sched.h | 17 +----- 6 files changed, 17 insertions(+), 154 deletions(-) diff --git a/xen/arch/i386/smp.c b/xen/arch/i386/smp.c index 3cae3ec1d8..baf88b5fba 100644 --- a/xen/arch/i386/smp.c +++ b/xen/arch/i386/smp.c @@ -55,54 +55,8 @@ * 7AP. We do not assume writes to the LVT deassering IRQs * 8AP. We do not enable low power mode (deep sleep) during MP bootup * 9AP. We do not use mixed mode - * - * Pentium - * There is a marginal case where REP MOVS on 100MHz SMP - * machines with B stepping processors can fail. XXX should provide - * an L1cache=Writethrough or L1cache=off option. - * - * B stepping CPUs may hang. There are hardware work arounds - * for this. We warn about it in case your board doesnt have the work - * arounds. Basically thats so I can tell anyone with a B stepping - * CPU and SMP problems "tough". - * - * Specific items [From Pentium Processor Specification Update] - * - * 1AP. Linux doesn't use remote read - * 2AP. Linux doesn't trust APIC errors - * 3AP. We work around this - * 4AP. Linux never generated 3 interrupts of the same priority - * to cause a lost local interrupt. - * 5AP. Remote read is never used - * 6AP. not affected - worked around in hardware - * 7AP. not affected - worked around in hardware - * 8AP. worked around in hardware - we get explicit CS errors if not - * 9AP. only 'noapic' mode affected. Might generate spurious - * interrupts, we log only the first one and count the - * rest silently. - * 10AP. not affected - worked around in hardware - * 11AP. Linux reads the APIC between writes to avoid this, as per - * the documentation. Make sure you preserve this as it affects - * the C stepping chips too. - * 12AP. not affected - worked around in hardware - * 13AP. not affected - worked around in hardware - * 14AP. we always deassert INIT during bootup - * 15AP. not affected - worked around in hardware - * 16AP. not affected - worked around in hardware - * 17AP. not affected - worked around in hardware - * 18AP. not affected - worked around in hardware - * 19AP. not affected - worked around in BIOS - * - * If this sounds worrying believe me these bugs are either ___RARE___, - * or are signal timing bugs worked around in hardware and there's - * about nothing of note with C stepping upwards. */ -/* The 'big kernel lock' */ -spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED; - -struct tlb_state cpu_tlbstate[NR_CPUS] = {[0 ... NR_CPUS-1] = { 0 }}; - /* * the following functions deal with sending IPIs between CPUs. * @@ -244,6 +198,14 @@ static inline void send_IPI_all(int vector) __send_IPI_shortcut(APIC_DEST_ALLINC, vector); } +/* + * ********* XEN NOTICE ********** + * I've left the following comments lying around as they look liek they might + * be useful to get multiprocessor guest OSes going. However, I suspect the + * issues we face will be quite different so I've ripped out all the + * TLBSTATE logic (I didn't understand it anyway :-). These comments do + * not apply to Xen, therefore! -- Keir (8th Oct 2003). + */ /* * Smarter SMP flushing macros. * c/o Linus Torvalds. @@ -252,28 +214,6 @@ static inline void send_IPI_all(int vector) * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul - */ - -static volatile unsigned long flush_cpumask; -#if 0 -static struct mm_struct * flush_mm; -static unsigned long flush_va; -#endif -static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED; -#define FLUSH_ALL 0xffffffff - -/* - * We cannot call mmdrop() because we are in interrupt context, - * instead update mm.cpu_vm_mask. - */ -static void inline leave_mm (unsigned long cpu) -{ - if (cpu_tlbstate[cpu].state == TLBSTATE_OK) - BUG(); - clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask); -} - -/* * * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] @@ -310,15 +250,17 @@ static void inline leave_mm (unsigned long cpu) * * The good news is that cpu_tlbstate is local to each cpu, no * write/read ordering problems. - */ - -/* + * * TLB flush IPI: * * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ +static volatile unsigned long flush_cpumask; +static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED; +#define FLUSH_ALL 0xffffffff + asmlinkage void smp_invalidate_interrupt (void) { unsigned long cpu = smp_processor_id(); @@ -326,19 +268,8 @@ asmlinkage void smp_invalidate_interrupt (void) if (!test_bit(cpu, &flush_cpumask)) return; -#if 0 - if (flush_mm == cpu_tlbstate[cpu].active_mm) { - if (cpu_tlbstate[cpu].state == TLBSTATE_OK) { - if (flush_va == FLUSH_ALL) -#endif - local_flush_tlb(); -#if 0 - else - __flush_tlb_one(flush_va); - } else - leave_mm(cpu); - } -#endif + local_flush_tlb(); + ack_APIC_irq(); clear_bit(cpu, &flush_cpumask); } @@ -354,11 +285,7 @@ void flush_tlb_others(unsigned long cpumask) static inline void do_flush_tlb_all_local(void) { - unsigned long cpu = smp_processor_id(); - __flush_tlb_all(); - if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY) - leave_mm(cpu); } static void flush_tlb_all_ipi(void* info) diff --git a/xen/common/domain.c b/xen/common/domain.c index 0cd37ec261..ab2ead683d 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -71,7 +71,6 @@ struct task_struct *do_newdomain(unsigned int dom_id, unsigned int cpu) SET_GDT_ADDRESS(p, DEFAULT_GDT_ADDRESS); p->addr_limit = USER_DS; - p->active_mm = &p->mm; /* * We're basically forcing default RPLs to 1, so that our "what privilege diff --git a/xen/common/schedule.c b/xen/common/schedule.c index ddc5d094a3..d95a287c93 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -470,7 +470,6 @@ asmlinkage void __enter_scheduler(void) if ( unlikely(prev == next) ) { /* We won't go through the normal tail, so do this by hand */ - prev->policy &= ~SCHED_YIELD; update_dom_time(prev->shared_info); return; } @@ -491,8 +490,7 @@ asmlinkage void __enter_scheduler(void) switch_to(prev, next); - prev->policy &= ~SCHED_YIELD; - if ( prev->state == TASK_DYING ) + if ( unlikely(prev->state == TASK_DYING) ) put_task_struct(prev); update_dom_time(next->shared_info); diff --git a/xen/common/timer.c b/xen/common/timer.c index ebc7f8d3b0..bdcda32d89 100644 --- a/xen/common/timer.c +++ b/xen/common/timer.c @@ -506,37 +506,6 @@ static void update_wall_time(unsigned long ticks) } } -static inline void do_process_times(struct task_struct *p, - unsigned long user, unsigned long system) -{ -} - - -void update_one_process(struct task_struct *p, unsigned long user, - unsigned long system, int cpu) -{ - do_process_times(p, user, system); -} - -/* - * Called from the timer interrupt handler to charge one tick to the current - * process. user_tick is 1 if the tick is user time, 0 for system. - */ -void update_process_times(int user_tick) -{ - struct task_struct *p = current; - int cpu = smp_processor_id(), system = user_tick ^ 1; - - update_one_process(p, user_tick, system, cpu); - - if ( --p->counter <= 0 ) - { - p->counter = 0; - set_bit(_HYP_EVENT_NEED_RESCHED, &p->hyp_events); - } -} - - /* jiffies at the most recent update of wall time */ unsigned long wall_jiffies; @@ -580,12 +549,8 @@ void timer_bh(void) void do_timer(struct pt_regs *regs) { - (*(unsigned long *)&jiffies)++; - if ( !using_apic_timer ) - update_process_times(user_mode(regs)); - mark_bh(TIMER_BH); if (TQ_ACTIVE(tq_timer)) mark_bh(TQUEUE_BH); diff --git a/xen/include/asm-i386/pgalloc.h b/xen/include/asm-i386/pgalloc.h index 7ccf9c1da9..841e5fd4a1 100644 --- a/xen/include/asm-i386/pgalloc.h +++ b/xen/include/asm-i386/pgalloc.h @@ -70,17 +70,6 @@ static inline void flush_tlb_cpu(unsigned int cpu) flush_tlb_others(1< struct mm_struct { - unsigned long cpu_vm_mask; /* * Every domain has a L1 pagetable of its own. Per-domain mappings * are put in this table (eg. the current GDT is mapped here). @@ -48,7 +47,6 @@ struct mm_struct { extern struct mm_struct init_mm; #define IDLE0_MM \ { \ - cpu_vm_mask: 0, \ perdomain_pt: 0, \ pagetable: mk_pagetable(__pa(idle_pg_table)) \ } @@ -105,13 +103,9 @@ struct task_struct unsigned int max_pages; /* max number of pages that can be possesed */ /* scheduling */ - struct list_head run_list; /* the run list */ + struct list_head run_list; int has_cpu; - int policy; - int counter; - struct ac_timer blt; /* blocked timeout */ - s_time_t lastschd; /* time this domain was last scheduled */ s_time_t cpu_time; /* total CPU time received till now */ s_time_t wokenup; /* time domain got woken up */ @@ -154,12 +148,6 @@ struct task_struct char name[MAX_DOMAIN_NAME]; - /* - * active_mm stays for now. It's entangled in the tricky TLB flushing - * stuff which I haven't addressed yet. It stays until I'm man enough - * to venture in. - */ - struct mm_struct *active_mm; struct thread_struct thread; struct task_struct *prev_task, *next_task, *next_hash; @@ -190,8 +178,6 @@ struct task_struct #define TASK_SUSPENDED 8 #define TASK_DYING 16 -#define SCHED_YIELD 0x10 - #include /* for KERNEL_DS */ #define IDLE0_TASK(_t) \ @@ -204,7 +190,6 @@ struct task_struct avt: 0xffffffff, \ mm: IDLE0_MM, \ addr_limit: KERNEL_DS, \ - active_mm: &idle0_task.mm, \ thread: INIT_THREAD, \ prev_task: &(_t), \ next_task: &(_t) \ -- 2.30.2